Tensorflow_2.0_Arithmetic_NN.ipynb

No Headings

The table of contents shows headings in notebooks and supported files.

Skip to Main
Jupyter

Tensorflow_2.0_Arithmetic_NN

Last Checkpoint: yesterday
  • File
  • Edit
  • View
  • Run
  • Kernel
  • Settings
  • Help
Kernel status: Idle Executed 2 cellsElapsed time: 24 seconds
[1]:
import random
import pandas as pd
[3]:
d1=[]
d2=[]
d3=[]
d4=[]
for i in range(50000):
x=random.random()
d1.append(x)
y=random.random()
d2.append(y)
z=random.random()
d3.append(z)
d4.append(2*x+3*y+z)
[5]:
df=pd.DataFrame({'x1':d1,'x2':d2,'x3':d3,'x4':d4},index=range(0,50000))
[7]:
X = df.iloc[:,0:3]
Y = df.iloc[:,3]
[9]:
import tensorflow as tf
from sklearn.model_selection import train_test_split
[11]:
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2)
[15]:
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(9,input_dim=3, activation='relu'),
tf.keras.layers.Dense(6, activation='relu'),
tf.keras.layers.Dense(1, activation='linear')
])

model.compile(optimizer='adam',
loss='mse',
metrics=['accuracy'])
C:\Users\nr802\anaconda3\Lib\site-packages\keras\src\layers\core\dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
[17]:
model.fit(X_train, y_train, epochs=20)
Epoch 1/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 6s 2ms/step - accuracy: 0.0000e+00 - loss: 2.3078
Epoch 2/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.0000e+00 - loss: 0.0040
Epoch 3/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.0000e+00 - loss: 7.7983e-04
Epoch 4/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.0000e+00 - loss: 8.5066e-05
Epoch 5/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.0000e+00 - loss: 3.6086e-05
Epoch 6/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 3s 3ms/step - accuracy: 0.0000e+00 - loss: 2.0163e-05
Epoch 7/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.0000e+00 - loss: 1.0294e-05
Epoch 8/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.0000e+00 - loss: 4.7881e-06
Epoch 9/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.0000e+00 - loss: 2.4368e-06
Epoch 10/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.0000e+00 - loss: 1.6454e-06
Epoch 11/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.0000e+00 - loss: 1.5711e-06
Epoch 12/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 5s 2ms/step - accuracy: 0.0000e+00 - loss: 2.0516e-06
Epoch 13/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.0000e+00 - loss: 1.1975e-06
Epoch 14/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.0000e+00 - loss: 1.0248e-06
Epoch 15/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.0000e+00 - loss: 2.1412e-06
Epoch 16/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.0000e+00 - loss: 2.1384e-07
Epoch 17/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.0000e+00 - loss: 1.4165e-06
Epoch 18/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.0000e+00 - loss: 1.2556e-06
Epoch 19/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.0000e+00 - loss: 1.6730e-06
Epoch 20/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.0000e+00 - loss: 1.0331e-06
[17]:
<keras.src.callbacks.history.History at 0x1cdf1b05ac0>
[19]:
y_pred=model.predict(X_test)
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
[21]:
from sklearn.metrics import r2_score
[23]:
r2_score(y_pred, y_test)
[23]:
0.999999996079316
[25]:
import matplotlib.pyplot as plt
%matplotlib inline
[18]:
metrics_df = pd.DataFrame(model.history.history)
metrics_df.plot()
[18]:
<matplotlib.axes._subplots.AxesSubplot at 0x3cd7882308>
[1]:
import random
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score

# Generating synthetic data
d1 = []
d2 = []
d3 = []
d4 = []
for i in range(50000):
x = random.random()
d1.append(x)
y = random.random()
d2.append(y)
z = random.random()
d3.append(z)
d4.append(2*x + 3*y + z) # Target variable

df = pd.DataFrame({'x1': d1, 'x2': d2, 'x3': d3, 'x4': d4}, index=range(0, 50000))

# Features and target
X = df.iloc[:, 0:3] # Features (x1, x2, x3)
Y = df.iloc[:, 3] # Target (x4)

# Train-test split
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2)

# Convert to NumPy arrays for TensorFlow compatibility
X_train = X_train.values
X_test = X_test.values
y_train = y_train.values
y_test = y_test.values

# Define the model
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(9, input_dim=3, activation='relu'),
tf.keras.layers.Dense(6, activation='relu'),
tf.keras.layers.Dense(1, activation='linear')
])

# Compile the model
model.compile(optimizer='adam', loss='mse', metrics=['accuracy'])

# Train the model
model.fit(X_train, y_train, epochs=20)

# Make predictions
y_pred = model.predict(X_test)

# Evaluate performance using R² score
r2 = r2_score(y_test, y_pred)
print(f'R² score: {r2}')

Epoch 1/20
C:\Users\nr802\anaconda3\Lib\site-packages\keras\src\layers\core\dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 2s 691us/step - accuracy: 0.0000e+00 - loss: 2.4579
Epoch 2/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 684us/step - accuracy: 0.0000e+00 - loss: 2.2003e-04
Epoch 3/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 699us/step - accuracy: 0.0000e+00 - loss: 7.5260e-05
Epoch 4/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 682us/step - accuracy: 0.0000e+00 - loss: 4.3746e-05
Epoch 5/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 683us/step - accuracy: 0.0000e+00 - loss: 1.7932e-05
Epoch 6/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 686us/step - accuracy: 0.0000e+00 - loss: 8.9144e-06
Epoch 7/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 684us/step - accuracy: 0.0000e+00 - loss: 3.7749e-06
Epoch 8/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 676us/step - accuracy: 0.0000e+00 - loss: 1.6104e-06
Epoch 9/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 661us/step - accuracy: 0.0000e+00 - loss: 9.2002e-07
Epoch 10/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 678us/step - accuracy: 0.0000e+00 - loss: 8.5859e-07
Epoch 11/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 719us/step - accuracy: 0.0000e+00 - loss: 1.8239e-06
Epoch 12/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 670us/step - accuracy: 0.0000e+00 - loss: 1.0040e-06
Epoch 13/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 677us/step - accuracy: 0.0000e+00 - loss: 1.8101e-06
Epoch 14/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 819us/step - accuracy: 0.0000e+00 - loss: 1.8927e-07
Epoch 15/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 748us/step - accuracy: 0.0000e+00 - loss: 5.4305e-07
Epoch 16/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 728us/step - accuracy: 0.0000e+00 - loss: 1.2576e-06
Epoch 17/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 688us/step - accuracy: 0.0000e+00 - loss: 1.0757e-06
Epoch 18/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 790us/step - accuracy: 0.0000e+00 - loss: 1.0126e-06
Epoch 19/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 750us/step - accuracy: 0.0000e+00 - loss: 6.0779e-07
Epoch 20/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 723us/step - accuracy: 0.0000e+00 - loss: 1.5399e-06
313/313 ━━━━━━━━━━━━━━━━━━━━ 0s 708us/step
R² score: 0.9999993176162081
[5]:
import random
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score

# Step 1: Generate synthetic data
d1, d2, d3, d4 = [], [], [], []
for i in range(50000):
x = random.random()
d1.append(x)
y = random.random()
d2.append(y)
z = random.random()
d3.append(z)
d4.append(2 * x + 3 * y + z) # Target variable based on the formula

# Create a DataFrame
df = pd.DataFrame({'x1': d1, 'x2': d2, 'x3': d3, 'x4': d4})

# Step 2: Define features (X) and target (Y)
X = df.iloc[:, 0:3] # x1, x2, x3
Y = df.iloc[:, 3] # x4

# Step 3: Train-test split
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=42)

# Convert Pandas DataFrame to NumPy arrays for TensorFlow compatibility
X_train = X_train.values
X_test = X_test.values
y_train = y_train.values
y_test = y_test.values

# Step 4: Define the model
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(9, input_dim=3, activation='relu'), # Input layer with 3 inputs
tf.keras.layers.Dense(6, activation='relu'), # Hidden layer
tf.keras.layers.Dense(1, activation='linear') # Output layer for regression
])

# Step 5: Compile the model
model.compile(optimizer='adam', loss='mse', metrics=['accuracy'])

# Step 6: Train the model
model.fit(X_train, y_train, epochs=20, verbose=1)

# Step 7: Make predictions on the test set
y_pred = model.predict(X_test)

# Step 8: Evaluate the model using R² score
r2 = r2_score(y_test, y_pred)
print(f'R² score: {r2}')
Epoch 1/20
C:\Users\nr802\anaconda3\Lib\site-packages\keras\src\layers\core\dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 2s 714us/step - accuracy: 0.0000e+00 - loss: 3.0730
Epoch 2/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 686us/step - accuracy: 0.0000e+00 - loss: 2.4699e-04
Epoch 3/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 667us/step - accuracy: 0.0000e+00 - loss: 5.9977e-05
Epoch 4/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 656us/step - accuracy: 0.0000e+00 - loss: 3.8079e-05
Epoch 5/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 660us/step - accuracy: 0.0000e+00 - loss: 2.6635e-05
Epoch 6/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 702us/step - accuracy: 0.0000e+00 - loss: 1.0837e-05
Epoch 7/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 687us/step - accuracy: 0.0000e+00 - loss: 7.4368e-06
Epoch 8/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 689us/step - accuracy: 0.0000e+00 - loss: 3.5923e-06
Epoch 9/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 679us/step - accuracy: 0.0000e+00 - loss: 1.8856e-06
Epoch 10/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 670us/step - accuracy: 0.0000e+00 - loss: 1.6077e-06
Epoch 11/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 702us/step - accuracy: 0.0000e+00 - loss: 1.6218e-06
Epoch 12/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 675us/step - accuracy: 0.0000e+00 - loss: 1.4404e-06
Epoch 13/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 682us/step - accuracy: 0.0000e+00 - loss: 6.6282e-07
Epoch 14/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 688us/step - accuracy: 0.0000e+00 - loss: 1.2724e-06
Epoch 15/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 717us/step - accuracy: 0.0000e+00 - loss: 1.8796e-06
Epoch 16/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 713us/step - accuracy: 0.0000e+00 - loss: 1.8872e-06
Epoch 17/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 691us/step - accuracy: 0.0000e+00 - loss: 2.1026e-06
Epoch 18/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 677us/step - accuracy: 0.0000e+00 - loss: 5.8587e-07
Epoch 19/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 688us/step - accuracy: 0.0000e+00 - loss: 1.4093e-06
Epoch 20/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 709us/step - accuracy: 0.0000e+00 - loss: 1.7018e-06
313/313 ━━━━━━━━━━━━━━━━━━━━ 0s 703us/step
R² score: 0.9999999454713913
[9]:
import random
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score

# Step 1: Generate synthetic data
d1, d2, d3, d4 = [], [], [], []
for i in range(50000):
x = random.random()
d1.append(x)
y = random.random()
d2.append(y)
z = random.random()
d3.append(z)
d4.append(2 * x + 3 * y + z) # Target variable based on the formula

# Create a DataFrame
df = pd.DataFrame({'x1': d1, 'x2': d2, 'x3': d3, 'x4': d4})

# Print some sample x, y, z values
print("Sample data:")
print(df.head()) # Display first 5 rows of the data

# Save the data to a CSV file for further inspection if needed
df.to_csv('generated_data.csv', index=False)
print("Data saved to 'generated_data.csv'.")

# Step 2: Define features (X) and target (Y)
X = df.iloc[:, 0:3] # x1, x2, x3
Y = df.iloc[:, 3] # x4

# Step 3: Train-test split
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=42)

# Convert Pandas DataFrame to NumPy arrays for TensorFlow compatibility
X_train = X_train.values
X_test = X_test.values
y_train = y_train.values
y_test = y_test.values

# Step 4: Define the model
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(9, input_dim=3, activation='relu'), # Input layer with 3 inputs
tf.keras.layers.Dense(6, activation='relu'), # Hidden layer
tf.keras.layers.Dense(1, activation='linear') # Output layer for regression
])

# Step 5: Compile the model
model.compile(optimizer='adam', loss='mse', metrics=['accuracy'])

# Step 6: Train the model
model.fit(X_train, y_train, epochs=20, verbose=1)

# Step 7: Make predictions on the test set
y_pred = model.predict(X_test)

# Step 8: Evaluate the model using R² score
r2 = r2_score(y_test, y_pred)
print(f'R² score: {r2}')

Sample data:
         x1        x2        x3        x4
0  0.269617  0.080424  0.718373  1.498878
1  0.563495  0.728264  0.032121  3.343904
2  0.191909  0.747924  0.703519  3.331108
3  0.094050  0.521800  0.547263  2.300764
4  0.864998  0.839702  0.178130  4.427231
Data saved to 'generated_data.csv'.
Epoch 1/20
C:\Users\nr802\anaconda3\Lib\site-packages\keras\src\layers\core\dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 3s 1ms/step - accuracy: 0.0000e+00 - loss: 3.1564
Epoch 2/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 734us/step - accuracy: 0.0000e+00 - loss: 2.1667e-04
Epoch 3/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 683us/step - accuracy: 0.0000e+00 - loss: 6.2466e-05
Epoch 4/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 671us/step - accuracy: 0.0000e+00 - loss: 2.1148e-05
Epoch 5/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 681us/step - accuracy: 0.0000e+00 - loss: 9.4850e-06
Epoch 6/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 701us/step - accuracy: 0.0000e+00 - loss: 3.1111e-06
Epoch 7/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 680us/step - accuracy: 0.0000e+00 - loss: 2.2540e-06
Epoch 8/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 699us/step - accuracy: 0.0000e+00 - loss: 1.1908e-06
Epoch 9/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 675us/step - accuracy: 0.0000e+00 - loss: 1.1005e-06
Epoch 10/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 697us/step - accuracy: 0.0000e+00 - loss: 2.2073e-06
Epoch 11/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 667us/step - accuracy: 0.0000e+00 - loss: 1.0672e-06
Epoch 12/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 738us/step - accuracy: 0.0000e+00 - loss: 2.2186e-06
Epoch 13/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 760us/step - accuracy: 0.0000e+00 - loss: 1.4514e-07
Epoch 14/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 768us/step - accuracy: 0.0000e+00 - loss: 1.6680e-06
Epoch 15/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 764us/step - accuracy: 0.0000e+00 - loss: 2.1007e-06
Epoch 16/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 786us/step - accuracy: 0.0000e+00 - loss: 1.3550e-06
Epoch 17/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 833us/step - accuracy: 0.0000e+00 - loss: 7.3741e-07
Epoch 18/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 835us/step - accuracy: 0.0000e+00 - loss: 2.2035e-06
Epoch 19/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 800us/step - accuracy: 0.0000e+00 - loss: 2.4538e-06
Epoch 20/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 839us/step - accuracy: 0.0000e+00 - loss: 2.4899e-06
313/313 ━━━━━━━━━━━━━━━━━━━━ 0s 788us/step
R² score: 0.9999998611216304
[11]:
import random
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
import matplotlib.pyplot as plt

# Step 1: Generate synthetic data
d1, d2, d3, d4 = [], [], [], []
for i in range(50000):
x = random.random()
d1.append(x)
y = random.random()
d2.append(y)
z = random.random()
d3.append(z)
d4.append(2 * x + 3 * y + z) # Target variable based on the formula

# Create a DataFrame
df = pd.DataFrame({'x1': d1, 'x2': d2, 'x3': d3, 'x4': d4})

# Step 2: Define features (X) and target (Y)
X = df.iloc[:, 0:3] # x1, x2, x3
Y = df.iloc[:, 3] # x4

# Step 3: Train-test split
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=42)

# Convert Pandas DataFrame to NumPy arrays for TensorFlow compatibility
X_train = X_train.values
X_test = X_test.values
y_train = y_train.values
y_test = y_test.values

# Step 4: Define the model
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(9, input_dim=3, activation='relu'), # Input layer with 3 inputs
tf.keras.layers.Dense(6, activation='relu'), # Hidden layer
tf.keras.layers.Dense(1, activation='linear') # Output layer for regression
])

# Step 5: Compile the model
model.compile(optimizer='adam', loss='mse', metrics=['accuracy'])

# Step 6: Train the model
model.fit(X_train, y_train, epochs=20, verbose=1)

# Step 7: Make predictions on the test set
y_pred = model.predict(X_test)

# Step 8: Evaluate the model using R² score
r2 = r2_score(y_test, y_pred)
print(f'R² score: {r2}')

# Step 9: Plot graphs
# Scatter plot of actual vs predicted values
plt.figure(figsize=(10, 6))
plt.scatter(y_test, y_pred, alpha=0.5, color='blue')
plt.plot([y_test.min(), y_test.max()], [y_test.min(), y_test.max()], 'k--', lw=2, color='red')
plt.title('Actual vs Predicted Values')
plt.xlabel('Actual Values (y_test)')
plt.ylabel('Predicted Values (y_pred)')
plt.grid(True)
plt.show()

# 3D Scatter plot of features x1, x2, x3 against the target
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(df['x1'], df['x2'], df['x3'], c=df['x4'], cmap='viridis', alpha=0.6)
ax.set_title('3D Scatter Plot of Features (x1, x2, x3) and Target (x4)')
ax.set_xlabel('x1')
ax.set_ylabel('x2')
ax.set_zlabel('x3')
plt.show()

Epoch 1/20
C:\Users\nr802\anaconda3\Lib\site-packages\keras\src\layers\core\dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 2s 699us/step - accuracy: 0.0000e+00 - loss: 6.7616
Epoch 2/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 679us/step - accuracy: 0.0000e+00 - loss: 0.0350
Epoch 3/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 679us/step - accuracy: 0.0000e+00 - loss: 9.0078e-05
Epoch 4/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 666us/step - accuracy: 0.0000e+00 - loss: 2.2502e-05
Epoch 5/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 684us/step - accuracy: 0.0000e+00 - loss: 1.0236e-05
Epoch 6/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 694us/step - accuracy: 0.0000e+00 - loss: 3.6448e-06
Epoch 7/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 694us/step - accuracy: 0.0000e+00 - loss: 1.7947e-06
Epoch 8/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 671us/step - accuracy: 0.0000e+00 - loss: 9.5031e-07
Epoch 9/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 687us/step - accuracy: 0.0000e+00 - loss: 5.8944e-07
Epoch 10/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 677us/step - accuracy: 0.0000e+00 - loss: 4.8120e-07
Epoch 11/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 673us/step - accuracy: 0.0000e+00 - loss: 8.9556e-07
Epoch 12/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 691us/step - accuracy: 0.0000e+00 - loss: 7.9028e-07
Epoch 13/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 679us/step - accuracy: 0.0000e+00 - loss: 6.2363e-07
Epoch 14/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 672us/step - accuracy: 0.0000e+00 - loss: 8.8967e-07
Epoch 15/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 695us/step - accuracy: 0.0000e+00 - loss: 9.8036e-07
Epoch 16/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 674us/step - accuracy: 0.0000e+00 - loss: 9.4307e-07
Epoch 17/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 692us/step - accuracy: 0.0000e+00 - loss: 3.2975e-07
Epoch 18/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 676us/step - accuracy: 0.0000e+00 - loss: 3.3105e-07
Epoch 19/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 723us/step - accuracy: 0.0000e+00 - loss: 4.3320e-07
Epoch 20/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 695us/step - accuracy: 0.0000e+00 - loss: 7.4015e-07
313/313 ━━━━━━━━━━━━━━━━━━━━ 0s 733us/step
R² score: 0.9999998184931179
C:\Users\nr802\AppData\Local\Temp\ipykernel_5420\4223667725.py:59: UserWarning: color is redundantly defined by the 'color' keyword argument and the fmt string "k--" (-> color='k'). The keyword argument will take precedence.
  plt.plot([y_test.min(), y_test.max()], [y_test.min(), y_test.max()], 'k--', lw=2, color='red')
[13]:
import random
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
import plotly.express as px
import plotly.graph_objects as go

# Step 1: Generate synthetic data
d1, d2, d3, d4 = [], [], [], []
for i in range(50000):
x = random.random()
d1.append(x)
y = random.random()
d2.append(y)
z = random.random()
d3.append(z)
d4.append(2 * x + 3 * y + z) # Target variable based on the formula

# Create a DataFrame
df = pd.DataFrame({'x1': d1, 'x2': d2, 'x3': d3, 'x4': d4})

# Step 2: Define features (X) and target (Y)
X = df.iloc[:, 0:3] # x1, x2, x3
Y = df.iloc[:, 3] # x4

# Step 3: Train-test split
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=42)

# Convert Pandas DataFrame to NumPy arrays for TensorFlow compatibility
X_train = X_train.values
X_test = X_test.values
y_train = y_train.values
y_test = y_test.values

# Step 4: Define the model
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(9, input_dim=3, activation='relu'), # Input layer with 3 inputs
tf.keras.layers.Dense(6, activation='relu'), # Hidden layer
tf.keras.layers.Dense(1, activation='linear') # Output layer for regression
])

# Step 5: Compile the model
model.compile(optimizer='adam', loss='mse', metrics=['accuracy'])

# Step 6: Train the model
model.fit(X_train, y_train, epochs=20, verbose=1)

# Step 7: Make predictions on the test set
y_pred = model.predict(X_test)

# Step 8: Evaluate the model using R² score
r2 = r2_score(y_test, y_pred)
print(f'R² score: {r2}')

# Step 9: Plotly Graphs
# 1. Scatter Plot: Actual vs Predicted Values
scatter_fig = px.scatter(
x=y_test, y=y_pred.flatten(),
labels={'x': 'Actual Values (y_test)', 'y': 'Predicted Values (y_pred)'},
title='Actual vs Predicted Values'
)
scatter_fig.add_trace(go.Scatter(
x=[y_test.min(), y_test.max()],
y=[y_test.min(), y_test.max()],
mode='lines',
line=dict(color='red', dash='dash'),
name='Ideal Fit Line'
))
scatter_fig.show()

# 2. 3D Scatter Plot: Features (x1, x2, x3) vs Target (x4)
scatter_3d_fig = px.scatter_3d(
df, x='x1', y='x2', z='x3', color='x4',
title='3D Scatter Plot of Features and Target',
labels={'x1': 'Feature x1', 'x2': 'Feature x2', 'x3': 'Feature x3', 'x4': 'Target x4'}
)
scatter_3d_fig.update_traces(marker=dict(size=3, opacity=0.7))
scatter_3d_fig.show()

Epoch 1/20
C:\Users\nr802\anaconda3\Lib\site-packages\keras\src\layers\core\dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 2s 728us/step - accuracy: 0.0000e+00 - loss: 2.5704
Epoch 2/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 697us/step - accuracy: 0.0000e+00 - loss: 1.1223e-04
Epoch 3/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 678us/step - accuracy: 0.0000e+00 - loss: 1.2298e-05
Epoch 4/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 676us/step - accuracy: 0.0000e+00 - loss: 3.5542e-06
Epoch 5/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 673us/step - accuracy: 0.0000e+00 - loss: 1.4461e-06
Epoch 6/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 697us/step - accuracy: 0.0000e+00 - loss: 1.0300e-06
Epoch 7/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 684us/step - accuracy: 0.0000e+00 - loss: 1.0092e-06
Epoch 8/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 727us/step - accuracy: 0.0000e+00 - loss: 1.2825e-06
Epoch 9/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 682us/step - accuracy: 0.0000e+00 - loss: 1.4362e-06
Epoch 10/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 686us/step - accuracy: 0.0000e+00 - loss: 1.5621e-06
Epoch 11/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 674us/step - accuracy: 0.0000e+00 - loss: 2.6883e-06
Epoch 12/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 700us/step - accuracy: 0.0000e+00 - loss: 1.6967e-06
Epoch 13/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 682us/step - accuracy: 0.0000e+00 - loss: 1.8244e-06
Epoch 14/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 691us/step - accuracy: 0.0000e+00 - loss: 3.3055e-06
Epoch 15/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 687us/step - accuracy: 0.0000e+00 - loss: 3.7872e-06
Epoch 16/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 687us/step - accuracy: 0.0000e+00 - loss: 2.6825e-06
Epoch 17/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 713us/step - accuracy: 0.0000e+00 - loss: 1.5076e-06
Epoch 18/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 701us/step - accuracy: 0.0000e+00 - loss: 2.6970e-06
Epoch 19/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 711us/step - accuracy: 0.0000e+00 - loss: 1.9525e-06
Epoch 20/20
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 696us/step - accuracy: 0.0000e+00 - loss: 2.2977e-06
313/313 ━━━━━━━━━━━━━━━━━━━━ 0s 654us/step
R² score: 0.999993681133065
2.198062.198082.19812.198122.198142.198162.198182.22.421.8
Ideal Fit LineActual vs Predicted ValuesActual Values (y_test)Predicted Values (y_pred)
plotly-logomark
Feature x1=0.9760245Feature x2=0.4259733Feature x3=0.986342Target x4=4.216310671330042
24Target x43D Scatter Plot of Features and Target
plotly-logomark
[ ]:

-

Variables

Callstack

    Breakpoints

    Source

    9
    1

    Kernel Sources

    Common Tools
    No metadata.
    Advanced Tools
    No metadata.
    Anaconda Assistant
    AI-powered coding, insights and debugging in your notebooks.
    To enable the following extensions, create an account or sign in.
    • Anaconda Assistant
      4.0.15
    • Coming soon!
    • Data Catalogs
    • Panel Deployments
    • Sharing
    Already have an account? Sign In
    For more information, read our Anaconda Assistant documentation.
    • Assistant
    • Open Anaconda Assistant
    • Console
    • Change Kernel…
    • Clear Console Cells
    • Close and Shut Down…
    • Insert Line Break
    • Interrupt Kernel
    • New Console
    • Restart Kernel…
    • Run Cell (forced)
    • Run Cell (unforced)
    • Show All Kernel Activity
    • Debugger
    • Breakpoints on exception
    • Evaluate Code
      Evaluate Code
    • Next
      Next
      F10
    • Pause
      Pause
      F9
    • Step In
      Step In
      F11
    • Step Out
      Step Out
      Shift+F11
    • Terminate
      Terminate
      Shift+F9
    • Display Languages
    • English
      English
    • File Operations
    • Autosave Documents
    • Download
      Download the file to your computer
    • Reload Notebook from Disk
      Reload contents from disk
    • Revert Notebook to Checkpoint…
      Revert contents to previous checkpoint
    • Save Notebook
      Save and create checkpoint
      Ctrl+S
    • Save Notebook As…
      Save with new path
      Ctrl+Shift+S
    • Trust HTML File
      Whether the HTML file is trusted. Trusting the file allows scripts to run in it, which may result in security risks. Only enable for files you trust.
    • Help
    • About Jupyter Notebook
    • Launch Jupyter Notebook File Browser
    • Show Keyboard Shortcuts
      Show relevant keyboard shortcuts for the current active widget
      Ctrl+Shift+H
    • Image Viewer
    • Flip image horizontally
      H
    • Flip image vertically
      V
    • Invert Colors
      I
    • Reset Image
      0
    • Rotate Clockwise
      ]
    • Rotate Counterclockwise
      [
    • Zoom In
      =
    • Zoom Out
      -
    • Kernel Operations
    • Shut Down All Kernels…
    • Main Area
    • Close All Other Tabs
    • Close Tab
      Alt+W
    • Close Tabs to Right
    • End Search
      Esc
    • Find Next
      Ctrl+G
    • Find Previous
      Ctrl+Shift+G
    • Find…
      Ctrl+F
    • Log Out
      Log out of JupyterLab
    • Shut Down
      Shut down JupyterLab
    • Mode
    • Toggle Zen Mode
    • Notebook Cell Operations
    • Change to Code Cell Type
      Y
    • Change to Heading 1
      1
    • Change to Heading 2
      2
    • Change to Heading 3
      3
    • Change to Heading 4
      4
    • Change to Heading 5
      5
    • Change to Heading 6
      6
    • Change to Markdown Cell Type
      M
    • Change to Raw Cell Type
      R
    • Clear Cell Output
      Clear outputs for the selected cells
    • Collapse All Code
    • Collapse All Outputs
    • Collapse Selected Code
    • Collapse Selected Outputs
    • Copy Cell
      Copy this cell
      C
    • Cut Cell
      Cut this cell
      X
    • Delete Cell
      Delete this cell
      D, D
    • Disable Scrolling for Outputs
    • Enable Scrolling for Outputs
    • Expand All Code
    • Expand All Outputs
    • Expand Selected Code
    • Expand Selected Outputs
    • Extend Selection Above
      Shift+K
    • Extend Selection Below
      Shift+J
    • Extend Selection to Bottom
      Shift+End
    • Extend Selection to Top
      Shift+Home
    • Insert Cell Above
      Insert a cell above
      A
    • Insert Cell Below
      Insert a cell below
      B
    • Insert Heading Above Current Heading
      Shift+A
    • Insert Heading Below Current Heading
      Shift+B
    • Merge Cell Above
      Ctrl+Backspace
    • Merge Cell Below
      Ctrl+Shift+M
    • Merge Selected Cells
      Shift+M
    • Move Cell Down
      Move this cell down
      Ctrl+Shift+Down
    • Move Cell Up
      Move this cell up
      Ctrl+Shift+Up
    • Paste Cell Above
      Paste this cell from the clipboard
    • Paste Cell and Replace
    • Paste Cell Below
      Paste this cell from the clipboard
      V
    • Redo Cell Operation
      Shift+Z
    • Render Side-by-Side
      Shift+R
    • Run Selected Cell
      Run this cell and advance
      Shift+Enter
    • Run Selected Cell and Do not Advance
      Ctrl+Enter
    • Run Selected Cell and Insert Below
      Alt+Enter
    • Run Selected Text or Current Line in Console
    • Select Cell Above
      K
    • Select Cell Below
      J
    • Select Heading Above or Collapse Heading
      Left
    • Select Heading Below or Expand Heading
      Right
    • Set side-by-side ratio
    • Split Cell
      Ctrl+Shift+-
    • Undo Cell Operation
      Z
    • Notebook Operations
    • Change Kernel…
    • Clear Outputs of All Cells
      Clear all outputs of all cells
    • Close and Shut Down Notebook
    • Collapse All Headings
      Ctrl+Shift+Left
    • Deselect All Cells
    • Enter Command Mode
      Ctrl+M
    • Enter Edit Mode
      Enter
    • Expand All Headings
      Ctrl+Shift+Right
    • Interrupt Kernel
      Interrupt the kernel
    • New Console for Notebook
    • New Notebook
      Create a new notebook
    • Open with Panel in New Browser Tab
    • Preview Notebook with Panel
    • Reconnect to Kernel
    • Render All Markdown Cells
    • Restart Kernel and Clear Outputs of All Cells…
      Restart the kernel and clear all outputs of all cells
    • Restart Kernel and Debug…
      Restart Kernel and Debug…
    • Restart Kernel and Run All Cells…
      Restart the kernel and run all cells
    • Restart Kernel and Run up to Selected Cell…
    • Restart Kernel…
      Restart the kernel
    • Run All Above Selected Cell
    • Run All Cells
      Run all cells
    • Run Selected Cell and All Below
    • Save and Export Notebook: Asciidoc
    • Save and Export Notebook: Executable Script
    • Save and Export Notebook: HTML
    • Save and Export Notebook: LaTeX
    • Save and Export Notebook: Markdown
    • Save and Export Notebook: PDF
    • Save and Export Notebook: Qtpdf
    • Save and Export Notebook: Qtpng
    • Save and Export Notebook: ReStructured Text
    • Save and Export Notebook: Reveal.js Slides
    • Save and Export Notebook: Webpdf
    • Select All Cells
      Ctrl+A
    • Show Line Numbers
    • Toggle Collapse Notebook Heading
    • Trust Notebook
    • Other
    • Open in JupyterLab
      JupyterLab
    • Terminal
    • Decrease Terminal Font Size
    • Increase Terminal Font Size
    • New Terminal
      Start a new terminal session
    • Refresh Terminal
      Refresh the current terminal session
    • Use Terminal Theme: Dark
      Set the terminal theme
    • Use Terminal Theme: Inherit
      Set the terminal theme
    • Use Terminal Theme: Light
      Set the terminal theme
    • Text Editor
    • Decrease Font Size
    • Increase Font Size
    • New Markdown File
      Create a new markdown file
    • New Python File
      Create a new Python file
    • New Text File
      Create a new text file
    • Spaces: 1
    • Spaces: 2
    • Spaces: 4
    • Spaces: 4
    • Spaces: 8
    • Theme
    • Decrease Code Font Size
    • Decrease Content Font Size
    • Decrease UI Font Size
    • Increase Code Font Size
    • Increase Content Font Size
    • Increase UI Font Size
    • Theme Scrollbars
    • Use Theme: JupyterLab Dark
    • Use Theme: JupyterLab Light
    • View
    • File Browser
    • Open JupyterLab
    • Show Anaconda Assistant
      Show Show Anaconda Assistant in the right sidebar
    • Show Debugger
      Show Show Debugger in the right sidebar
    • Show Header
    • Show Notebook Tools
      Show Show Notebook Tools in the right sidebar
    • Show Table of Contents
      Show Show Table of Contents in the left sidebar